diff --git a/api/graphite.go b/api/graphite.go index ba547525e7..a0d6db38ff 100644 --- a/api/graphite.go +++ b/api/graphite.go @@ -4,7 +4,6 @@ import ( "errors" "math" "net/http" - "sort" "strings" "sync" "time" @@ -204,7 +203,6 @@ func (s *Server) renderMetrics(ctx *middleware.Context, request models.GraphiteR ctx.Error(http.StatusBadRequest, err.Error()) return } - sort.Sort(models.SeriesByTarget(out)) switch request.Format { case "msgp": diff --git a/docs/graphite.md b/docs/graphite.md index 9187ef3bb8..8b1f6ca303 100644 --- a/docs/graphite.md +++ b/docs/graphite.md @@ -17,3 +17,4 @@ averageSeries(seriesLists) series | avg | Stable consolidateBy(seriesList, func) seriesList | | Stable movingAverage(seriesLists, windowSize) seriesList | | Unstable sumSeries(seriesLists) series | sum | Stable +transformNull(seriesList, default=0) seriesList | | Stable diff --git a/expr/LICENSE b/expr/LICENSE new file mode 100644 index 0000000000..340efde788 --- /dev/null +++ b/expr/LICENSE @@ -0,0 +1,26 @@ +Regarding type expr, much of parse.go, parse_test.go: + +Copyright (c) 2017 Grafana Labs +Copyright (c) 2014,2015 Damian Gryski +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/expr/NOTES b/expr/NOTES index 4a9d71ae22..ba6a8d3d86 100644 --- a/expr/NOTES +++ b/expr/NOTES @@ -1,4 +1,23 @@ -regarding when to allocate models.Series (or more precisely, their []schema.Point attribute), there's 2 main choices: +## future code quality improvements + +The code is a bit ugly in some places, there's a few items I'm not happy with and hope to change down the road, ideally after building out the feature more and getting a better understanding of requirements. In particular: + +* do all validation in planner, nothing should be needed in individual functions Init invocations + though often validation and storing a value are intertwined. should ideally feed function post-validation value (e.g. movingAverage windowsize) +* in func Exec, uniform access to optional args, whether specified via kwarg or position. +* Init and NeedRange could be 1 function, though not sure if they should +* we instantiate and init functions twice (during creation of plan and running it). could do this only once if ... +function invocations could be attached to expressions (when creating the plan). when executing them, we then +also don't need to pass args anymore (which is currently redundant since they already got them via Init) +* in general, while constructing the plan, we should probably build a different datastructure more tailored towards execution, currently we + stick to the parse-time expr types in the planner and these concepts also make their way into the function implementations, which gets a bit dirty at times. + +movingAverage accepts arg as both int or string, how to handle properly? + + +## regarding when to allocate models.Series (or more precisely, their []schema.Point attribute) + +there's 2 main choices: 1) copy-on-write: - each function does not modify data in their inputs, they allocate new slices (or better: get from pool) etc to save output computations @@ -29,3 +48,6 @@ note that individual processing functions only request slices from the pool, the e.g. an avg of 3 series will create 1 new series (from pool), but won't put the 3 inputs back in the pool, because another processing step may require the same input data. +function implementations: +* must not modify existing slices +* should use the pool to get new slices in which to store their new/modified data. and add the new slice into the cache so it can later be cleaned diff --git a/expr/data_test.go b/expr/data_test.go index 2cfa876021..697c4b27fc 100644 --- a/expr/data_test.go +++ b/expr/data_test.go @@ -33,6 +33,16 @@ var c = []schema.Point{ {Val: 4, Ts: 60}, } +// emulate an 8 bit counter +var d = []schema.Point{ + {Val: 0, Ts: 10}, + {Val: 33, Ts: 20}, + {Val: 199, Ts: 30}, + {Val: 29, Ts: 40}, // overflowed + {Val: 80, Ts: 50}, + {Val: 250, Ts: 60}, +} + var sumab = []schema.Point{ {Val: 0, Ts: 10}, {Val: math.MaxFloat64, Ts: 20}, diff --git a/expr/expr.go b/expr/expr.go index 316c07de56..14dcdfe691 100644 --- a/expr/expr.go +++ b/expr/expr.go @@ -10,17 +10,21 @@ type exprType int // the following types let the parser express the type it parsed from the input targets const ( - etName exprType = iota // a string without quotes, e.g. metric.name, metric.*.query.patt* or special values like True or None which some functions expect + etName exprType = iota // a string without quotes, e.g. metric.name, metric.*.query.patt* or special values like None which some functions expect + etBool // True or False etFunc // a function call like movingAverage(foo, bar) - etConst // any number, parsed as a float64 value + etInt // any number with no decimal numbers, parsed as a float64 value + etFloat // any number with decimals, parsed as a float64 value etString // anything that was between '' or "" ) // expr represents a parsed expression type expr struct { etype exprType - float float64 // for etConst - str string // for etName, etFunc (func name), etString and etConst (unparsed input value) + float float64 // for etFloat + int int64 // for etInt + str string // for etName, etFunc (func name), etString, etBool, etInt and etFloat (unparsed input value) + bool bool // for etBool args []*expr // for etFunc: positional args which itself are expressions namedArgs map[string]*expr // for etFunc: named args which itself are expressions argsStr string // for etFunc: literal string of how all the args were specified @@ -40,10 +44,199 @@ func (e expr) Print(indent int) string { args += strings.Repeat(" ", indent+2) + k + "=" + v.Print(0) + ",\n" } return fmt.Sprintf("%sexpr-func %s(\n%s%s)", space, e.str, args, space) - case etConst: - return fmt.Sprintf("%sexpr-const %v", space, e.float) + case etFloat: + return fmt.Sprintf("%sexpr-float %v", space, e.float) + case etInt: + return fmt.Sprintf("%sexpr-int %v", space, e.int) case etString: return fmt.Sprintf("%sexpr-string %q", space, e.str) } return "HUH-SHOULD-NEVER-HAPPEN" } + +// consumeBasicArg verifies that the argument at given pos matches the expected arg +// it's up to the caller to assure that given pos is valid before calling. +// if arg allows for multiple arguments, pos is advanced to cover all accepted arguments. +// if the arg is a "basic" arg (meaning not a series, seriesList or seriesLists) the +// appropriate value(s) will be assigned to exp.val +// for non-basic args, see consumeSeriesArg which should be called after deducing the required from/to. +// the returned pos is always the index where the next argument should be. +func (e expr) consumeBasicArg(pos int, exp Arg) (int, error) { + got := e.args[pos] + switch v := exp.(type) { + case ArgSeries, ArgSeriesList: + if got.etype != etName && got.etype != etFunc { + return 0, ErrBadArgumentStr{"func or name", string(got.etype)} + } + case ArgSeriesLists: + if got.etype != etName && got.etype != etFunc { + return 0, ErrBadArgumentStr{"func or name", string(got.etype)} + } + // special case! consume all subsequent args (if any) in args that will also yield a seriesList + for len(e.args) > pos+1 && (e.args[pos+1].etype == etName || e.args[pos+1].etype == etFunc) { + pos += 1 + } + case ArgInt: + if got.etype != etInt { + return 0, ErrBadArgumentStr{"int", string(got.etype)} + } + for _, va := range v.validator { + if err := va(got); err != nil { + return 0, fmt.Errorf("%s: %s", v.key, err.Error()) + } + } + *v.val = got.int + case ArgInts: + if got.etype != etInt { + return 0, ErrBadArgumentStr{"int", string(got.etype)} + } + *v.val = append(*v.val, got.int) + // special case! consume all subsequent args (if any) in args that will also yield an integer + for len(e.args) > pos+1 && e.args[pos+1].etype == etInt { + pos += 1 + for _, va := range v.validator { + if err := va(e.args[pos]); err != nil { + return 0, fmt.Errorf("%s: %s", v.key, err.Error()) + } + } + *v.val = append(*v.val, e.args[pos].int) + } + case ArgFloat: + // integer is also a valid float, just happened to have no decimals + if got.etype != etFloat && got.etype != etInt { + return 0, ErrBadArgumentStr{"float", string(got.etype)} + } + for _, va := range v.validator { + if err := va(got); err != nil { + return 0, fmt.Errorf("%s: %s", v.key, err.Error()) + } + } + if got.etype == etInt { + *v.val = float64(got.int) + } else { + *v.val = got.float + } + case ArgString: + if got.etype != etString { + return 0, ErrBadArgumentStr{"string", string(got.etype)} + } + for _, va := range v.validator { + if err := va(got); err != nil { + return 0, fmt.Errorf("%s: %s", v.key, err.Error()) + } + } + *v.val = got.str + case ArgBool: + if got.etype != etBool { + return 0, ErrBadArgumentStr{"string", string(got.etype)} + } + *v.val = got.bool + default: + return 0, fmt.Errorf("unsupported type %T for consumeBasicArg", exp) + } + pos += 1 + return pos, nil +} + +// consumeSeriesArg verifies that the argument at given pos matches the expected arg +// it's up to the caller to assure that given pos is valid before calling. +// if arg allows for multiple arguments, pos is advanced to cover all accepted arguments. +// if the arg is a "basic", no value is saved (it's up to consumeBasicArg to do that) +// but for non-basic args (meaning a series, seriesList or seriesLists) the +// appropriate value(s) will be assigned to exp.val +// the returned pos is always the index where the next argument should be. +func (e expr) consumeSeriesArg(pos int, exp Arg, from, to uint32, stable bool, reqs []Req) (int, []Req, error) { + got := e.args[pos] + var err error + var fn GraphiteFunc + switch v := exp.(type) { + case ArgSeries: + if got.etype != etName && got.etype != etFunc { + return 0, nil, ErrBadArgumentStr{"func or name", string(got.etype)} + } + fn, reqs, err = newplan(got, from, to, stable, reqs) + if err != nil { + return 0, nil, err + } + *v.val = fn + case ArgSeriesList: + if got.etype != etName && got.etype != etFunc { + return 0, nil, ErrBadArgumentStr{"func or name", string(got.etype)} + } + fn, reqs, err = newplan(got, from, to, stable, reqs) + if err != nil { + return 0, nil, err + } + *v.val = fn + case ArgSeriesLists: + if got.etype != etName && got.etype != etFunc { + return 0, nil, ErrBadArgumentStr{"func or name", string(got.etype)} + } + fn, reqs, err = newplan(got, from, to, stable, reqs) + if err != nil { + return 0, nil, err + } + *v.val = append(*v.val, fn) + // special case! consume all subsequent args (if any) in args that will also yield a seriesList + for len(e.args) > pos+1 && (e.args[pos+1].etype == etName || e.args[pos+1].etype == etFunc) { + pos += 1 + fn, reqs, err = newplan(e.args[pos], from, to, stable, reqs) + if err != nil { + return 0, nil, err + } + *v.val = append(*v.val, fn) + } + default: + return 0, nil, fmt.Errorf("unsupported type %T for consumeSeriesArg", exp) + } + pos += 1 + return pos, reqs, nil +} + +// consumeKwarg consumes the kwarg (by key k) and verifies it +// if the specified argument is valid, it is saved in exp.val +// where exp is the arg specified by the function that has the given key +func (e expr) consumeKwarg(key string, optArgs []Arg) error { + var found bool + var exp Arg + for _, exp = range optArgs { + if exp.Key() == key { + found = true + break + } + } + if !found { + return ErrUnknownKwarg{key} + } + got := e.namedArgs[key] + switch v := exp.(type) { + case ArgInt: + if got.etype != etInt { + return ErrBadKwarg{key, exp, got.etype} + } + *v.val = got.int + case ArgFloat: + switch got.etype { + case etInt: + // integer is also a valid float, just happened to have no decimals + *v.val = float64(got.int) + case etFloat: + *v.val = got.float + default: + return ErrBadKwarg{key, exp, got.etype} + } + case ArgString: + if got.etype != etString { + return ErrBadKwarg{key, exp, got.etype} + } + *v.val = got.str + case ArgBool: + if got.etype != etBool { + return ErrBadKwarg{key, exp, got.etype} + } + *v.val = got.bool + default: + return fmt.Errorf("unsupported type %T for consumeKwarg", exp) + } + return nil +} diff --git a/expr/exprtype_string.go b/expr/exprtype_string.go new file mode 100644 index 0000000000..c5798892d3 --- /dev/null +++ b/expr/exprtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=exprType"; DO NOT EDIT. + +package expr + +import "fmt" + +const _exprType_name = "etNameetBooletFuncetIntetFloatetString" + +var _exprType_index = [...]uint8{0, 6, 12, 18, 23, 30, 38} + +func (i exprType) String() string { + if i < 0 || i >= exprType(len(_exprType_index)-1) { + return fmt.Sprintf("exprType(%d)", i) + } + return _exprType_name[_exprType_index[i]:_exprType_index[i+1]] +} diff --git a/expr/func_alias.go b/expr/func_alias.go index b2905eccdd..1dff6777e7 100644 --- a/expr/func_alias.go +++ b/expr/func_alias.go @@ -1,39 +1,36 @@ package expr import ( - "reflect" - "github.com/raintank/metrictank/api/models" ) type FuncAlias struct { + in GraphiteFunc + alias string } -func NewAlias() Func { - return FuncAlias{} -} - -func (s FuncAlias) Signature() ([]argType, []argType) { - return []argType{seriesList, str}, []argType{seriesList} +func NewAlias() GraphiteFunc { + return &FuncAlias{} } -func (s FuncAlias) Init(args []*expr) error { - return nil +func (s *FuncAlias) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesList{val: &s.in}, + ArgString{val: &s.alias}, + }, []Arg{ArgSeriesList{}} } -func (s FuncAlias) NeedRange(from, to uint32) (uint32, uint32) { +func (s *FuncAlias) NeedRange(from, to uint32) (uint32, uint32) { return from, to } -func (s FuncAlias) Exec(cache map[Req][]models.Series, in ...interface{}) ([]interface{}, error) { - series, ok := in[0].([]models.Series) - if !ok { - return nil, ErrBadArgument{reflect.TypeOf([]models.Series{}), reflect.TypeOf(in[0])} +func (s *FuncAlias) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + series, err := s.in.Exec(cache) + if err != nil { + return nil, err } - var out []interface{} - for _, serie := range series { - serie.Target = in[1].(string) - out = append(out, serie) + for i := range series { + series[i].Target = s.alias } - return out, nil + return series, nil } diff --git a/expr/func_alias_test.go b/expr/func_alias_test.go index 98adb7d05e..21586d6bcf 100644 --- a/expr/func_alias_test.go +++ b/expr/func_alias_test.go @@ -74,7 +74,10 @@ func TestAliasMultiple(t *testing.T) { func testAlias(name string, in []models.Series, out []models.Series, t *testing.T) { f := NewAlias() - got, err := f.Exec(make(map[Req][]models.Series), interface{}(in), interface{}("bar")) + alias := f.(*FuncAlias) + alias.alias = "bar" + alias.in = NewMock(in) + got, err := f.Exec(make(map[Req][]models.Series)) if err != nil { t.Fatalf("case %q: err should be nil. got %q", name, err) } @@ -82,10 +85,7 @@ func testAlias(name string, in []models.Series, out []models.Series, t *testing. t.Fatalf("case %q: alias output should be same amount of series as input: %d, not %d", name, len(in), len(got)) } for i, o := range out { - g, ok := got[i].(models.Series) - if !ok { - t.Fatalf("case %q: expected alias output of models.Series type", name) - } + g := got[i] if o.Target != g.Target { t.Fatalf("case %q: expected target %q, got %q", name, o.Target, g.Target) } @@ -126,7 +126,10 @@ func benchmarkAlias(b *testing.B, numSeries int) { b.ResetTimer() for i := 0; i < b.N; i++ { f := NewAlias() - got, err := f.Exec(make(map[Req][]models.Series), interface{}(input), "new-name") + alias := f.(*FuncAlias) + alias.alias = "new-name" + alias.in = NewMock(input) + got, err := f.Exec(make(map[Req][]models.Series)) if err != nil { b.Fatalf("%s", err) } diff --git a/expr/func_aliasbynode.go b/expr/func_aliasbynode.go new file mode 100644 index 0000000000..18056322a5 --- /dev/null +++ b/expr/func_aliasbynode.go @@ -0,0 +1,51 @@ +package expr + +import ( + "strings" + + "github.com/raintank/metrictank/api/models" +) + +type FuncAliasByNode struct { + in GraphiteFunc + nodes []int64 +} + +func NewAliasByNode() GraphiteFunc { + return &FuncAliasByNode{} +} + +func (s *FuncAliasByNode) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesList{val: &s.in}, + ArgInts{val: &s.nodes}, + }, []Arg{ArgSeries{}} +} + +func (s *FuncAliasByNode) NeedRange(from, to uint32) (uint32, uint32) { + return from, to +} + +func (s *FuncAliasByNode) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + series, err := s.in.Exec(cache) + if err != nil { + return nil, err + } + for i, serie := range series { + metric := extractMetric(serie.Target) + parts := strings.Split(metric, ".") + var name []string + for _, n64 := range s.nodes { + n := int(n64) + if n < 0 { + n += len(parts) + } + if n >= len(parts) || n < 0 { + continue + } + name = append(name, parts[n]) + } + series[i].Target = strings.Join(name, ".") + } + return series, nil +} diff --git a/expr/func_avgseries.go b/expr/func_avgseries.go index 2fc9fd5ffc..287fee3ca7 100644 --- a/expr/func_avgseries.go +++ b/expr/func_avgseries.go @@ -3,44 +3,41 @@ package expr import ( "fmt" "math" - "reflect" "github.com/raintank/metrictank/api/models" "gopkg.in/raintank/schema.v1" ) type FuncAvgSeries struct { + in []GraphiteFunc } -func NewAvgSeries() Func { - return FuncAvgSeries{} +func NewAvgSeries() GraphiteFunc { + return &FuncAvgSeries{} } -func (s FuncAvgSeries) Signature() ([]argType, []argType) { - return []argType{seriesLists}, []argType{series} +func (s *FuncAvgSeries) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesLists{val: &s.in}, + }, []Arg{ArgSeries{}} } -func (s FuncAvgSeries) Init(args []*expr) error { - return nil -} - -func (s FuncAvgSeries) NeedRange(from, to uint32) (uint32, uint32) { +func (s *FuncAvgSeries) NeedRange(from, to uint32) (uint32, uint32) { return from, to } -func (s FuncAvgSeries) Exec(cache map[Req][]models.Series, inputs ...interface{}) ([]interface{}, error) { +func (s *FuncAvgSeries) Exec(cache map[Req][]models.Series) ([]models.Series, error) { var series []models.Series - for _, input := range inputs { - seriesList, ok := input.([]models.Series) - if !ok { - return nil, ErrBadArgument{reflect.TypeOf([]models.Series{}), reflect.TypeOf(input)} + for i := range s.in { + in, err := s.in[i].Exec(cache) + if err != nil { + return nil, err } - series = append(series, seriesList...) - + series = append(series, in...) } if len(series) == 1 { series[0].Target = fmt.Sprintf("averageSeries(%s)", series[0].QueryPatt) - return []interface{}{series[0]}, nil + return series, nil } out := pointSlicePool.Get().([]schema.Point) for i := 0; i < len(series[0].Datapoints); i++ { @@ -70,5 +67,5 @@ func (s FuncAvgSeries) Exec(cache map[Req][]models.Series, inputs ...interface{} } cache[Req{}] = append(cache[Req{}], output) - return []interface{}{output}, nil + return []models.Series{output}, nil } diff --git a/expr/func_avgseries_test.go b/expr/func_avgseries_test.go index e53eb2e1a0..cde2907164 100644 --- a/expr/func_avgseries_test.go +++ b/expr/func_avgseries_test.go @@ -99,21 +99,18 @@ func TestAvgSeriesMultipleDiffQuery(t *testing.T) { func testAvgSeries(name string, in [][]models.Series, out models.Series, t *testing.T) { f := NewAvgSeries() - var input []interface{} + avg := f.(*FuncAvgSeries) for _, i := range in { - input = append(input, i) + avg.in = append(avg.in, NewMock(i)) } - got, err := f.Exec(make(map[Req][]models.Series), input...) + got, err := f.Exec(make(map[Req][]models.Series)) if err != nil { t.Fatalf("case %q: err should be nil. got %q", name, err) } if len(got) != 1 { t.Fatalf("case %q: avgSeries output should be only 1 thing (a series) not %d", name, len(got)) } - g, ok := got[0].(models.Series) - if !ok { - t.Fatalf("case %q: expected avg output of models.Series type", name) - } + g := got[0] if g.Target != out.Target { t.Fatalf("case %q: expected target %q, got %q", name, out.Target, g.Target) } @@ -182,13 +179,15 @@ func benchmarkAvgSeries(b *testing.B, numSeries int, fn0, fn1 func() []schema.Po input = append(input, series) } b.ResetTimer() + var err error for i := 0; i < b.N; i++ { f := NewAvgSeries() - got, err := f.Exec(make(map[Req][]models.Series), interface{}(input)) + avg := f.(*FuncAvgSeries) + avg.in = append(avg.in, NewMock(input)) + results, err = f.Exec(make(map[Req][]models.Series)) if err != nil { b.Fatalf("%s", err) } - results = got } - b.SetBytes(int64(numSeries * len(input[0].Datapoints) * 12)) + b.SetBytes(int64(numSeries * len(results[0].Datapoints) * 12)) } diff --git a/expr/func_consolidateby.go b/expr/func_consolidateby.go index 79ac7eecf5..1417e70479 100644 --- a/expr/func_consolidateby.go +++ b/expr/func_consolidateby.go @@ -2,40 +2,42 @@ package expr import ( "fmt" - "reflect" "github.com/raintank/metrictank/api/models" "github.com/raintank/metrictank/consolidation" ) type FuncConsolidateBy struct { + in GraphiteFunc + by string } -func NewConsolidateBy() Func { - return FuncConsolidateBy{} +func NewConsolidateBy() GraphiteFunc { + return &FuncConsolidateBy{} } -func (s FuncConsolidateBy) Signature() ([]argType, []argType) { - return []argType{seriesList, str}, []argType{seriesList} -} - -func (s FuncConsolidateBy) Init(args []*expr) error { - return consolidation.Validate(args[1].str) +func (s *FuncConsolidateBy) Signature() ([]Arg, []Arg) { + validConsol := func(e *expr) error { + return consolidation.Validate(e.str) + } + return []Arg{ + ArgSeriesList{val: &s.in}, + ArgString{val: &s.by, validator: []Validator{validConsol}}, + }, []Arg{ArgSeriesList{}} } -func (s FuncConsolidateBy) NeedRange(from, to uint32) (uint32, uint32) { +func (s *FuncConsolidateBy) NeedRange(from, to uint32) (uint32, uint32) { return from, to } -func (s FuncConsolidateBy) Exec(cache map[Req][]models.Series, inputs ...interface{}) ([]interface{}, error) { - var out []interface{} - input := inputs[0] - seriesList, ok := input.([]models.Series) - if !ok { - return nil, ErrBadArgument{reflect.TypeOf([]models.Series{}), reflect.TypeOf(input)} +func (s *FuncConsolidateBy) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + series, err := s.in.Exec(cache) + if err != nil { + return nil, err } - for _, series := range seriesList { - series.Target = fmt.Sprintf("consolidateBy(%s,\"%s\")", series.Target, inputs[1].(string)) + var out []models.Series + for _, series := range series { + series.Target = fmt.Sprintf("consolidateBy(%s,\"%s\")", series.Target, s.by) out = append(out, series) } return out, nil diff --git a/expr/func_get.go b/expr/func_get.go new file mode 100644 index 0000000000..f8e1d53b04 --- /dev/null +++ b/expr/func_get.go @@ -0,0 +1,24 @@ +package expr + +import "github.com/raintank/metrictank/api/models" + +// internal function just for getting data +type FuncGet struct { + req Req +} + +func NewGet(req Req) GraphiteFunc { + return FuncGet{req} +} + +func (s FuncGet) Signature() ([]Arg, []Arg) { + return nil, []Arg{ArgSeries{}} +} + +func (s FuncGet) NeedRange(from, to uint32) (uint32, uint32) { + return from, to +} + +func (s FuncGet) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + return cache[s.req], nil +} diff --git a/expr/func_mock_test.go b/expr/func_mock_test.go new file mode 100644 index 0000000000..d62d9980f3 --- /dev/null +++ b/expr/func_mock_test.go @@ -0,0 +1,24 @@ +package expr + +import "github.com/raintank/metrictank/api/models" + +// internal function just for getting data +type FuncMock struct { + data []models.Series +} + +func NewMock(data []models.Series) GraphiteFunc { + return FuncMock{data} +} + +func (s FuncMock) Signature() ([]Arg, []Arg) { + return nil, []Arg{ArgSeries{}} +} + +func (s FuncMock) NeedRange(from, to uint32) (uint32, uint32) { + return from, to +} + +func (s FuncMock) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + return s.data, nil +} diff --git a/expr/func_movingaverage.go b/expr/func_movingaverage.go index 7eaac4bd6f..f70421a251 100644 --- a/expr/func_movingaverage.go +++ b/expr/func_movingaverage.go @@ -1,49 +1,40 @@ package expr import ( - "strconv" - - "github.com/raintank/dur" "github.com/raintank/metrictank/api/models" ) type FuncMovingAverage struct { - window uint32 + window int64 + in GraphiteFunc } -func NewMovingAverage() Func { +func NewMovingAverage() GraphiteFunc { return &FuncMovingAverage{} } // note if input is 1 series, then output is too. not sure how to communicate that -func (s *FuncMovingAverage) Signature() ([]argType, []argType) { - return []argType{seriesList, str}, []argType{seriesList} -} - -func (s *FuncMovingAverage) Init(args []*expr) error { - if args[1].etype == etConst { - points, err := strconv.Atoi(args[1].str) - // TODO this is not correct. what really needs to happen here is figure out the interval of the data we will consume +func (s *FuncMovingAverage) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesList{val: &s.in}, + // this could be an int OR a string. + // we need to figure out the interval of the data we will consume // and request from -= interval * points // interestingly the from adjustment might mean the archive TTL is no longer sufficient and push the request into a different rollup archive, which we should probably // account for. let's solve all of this later. - s.window = uint32(points) - return err - } else { - if args[1].etype != etString { - panic("internal error: MovingAverage cannot parse windowSize, should already have been validated") - } - window, err := dur.ParseUsec(args[1].str) - s.window = window - return err - } + ArgInt{val: &s.window}, + }, []Arg{ArgSeriesList{}} } func (s *FuncMovingAverage) NeedRange(from, to uint32) (uint32, uint32) { - return from - s.window, to + return from - uint32(s.window), to } -func (s *FuncMovingAverage) Exec(cache map[Req][]models.Series, in ...interface{}) ([]interface{}, error) { +func (s *FuncMovingAverage) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + series, err := s.in.Exec(cache) + if err != nil { + return nil, err + } //cache[Req{}] = append(cache[Req{}], out) - return nil, nil + return series, nil } diff --git a/expr/func_persecond.go b/expr/func_persecond.go new file mode 100644 index 0000000000..a216b1fe33 --- /dev/null +++ b/expr/func_persecond.go @@ -0,0 +1,73 @@ +package expr + +import ( + "fmt" + "math" + + "github.com/raintank/metrictank/api/models" + "gopkg.in/raintank/schema.v1" +) + +type FuncPerSecond struct { + in []GraphiteFunc + maxValue int64 +} + +func NewPerSecond() GraphiteFunc { + return &FuncPerSecond{} +} + +func (s *FuncPerSecond) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesLists{val: &s.in}, + ArgInt{key: "maxValue", opt: true, validator: []Validator{IntPositive}, val: &s.maxValue}, + }, []Arg{ + ArgSeriesList{}, + } +} + +func (s *FuncPerSecond) NeedRange(from, to uint32) (uint32, uint32) { + return from, to +} + +func (s *FuncPerSecond) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + var series []models.Series + for i := range s.in { + serie, err := s.in[i].Exec(cache) + if err != nil { + return nil, err + } + series = append(series, serie...) + } + maxValue := math.NaN() + if s.maxValue > 0 { + maxValue = float64(s.maxValue) + } + var outputs []models.Series + for _, serie := range series { + out := pointSlicePool.Get().([]schema.Point) + for i, v := range serie.Datapoints { + out = append(out, schema.Point{Ts: v.Ts}) + if i == 0 || math.IsNaN(v.Val) || math.IsNaN(serie.Datapoints[i-1].Val) { + out[i].Val = math.NaN() + continue + } + diff := v.Val - serie.Datapoints[i-1].Val + if diff >= 0 { + out[i].Val = diff / float64(serie.Interval) + } else if !math.IsNaN(maxValue) && maxValue >= v.Val { + out[i].Val = (maxValue + diff + 1) / float64(serie.Interval) + } else { + out[i].Val = math.NaN() + } + } + s := models.Series{ + Target: fmt.Sprintf("perSecond(%s)", serie.Target), + Datapoints: out, + Interval: serie.Interval, + } + outputs = append(outputs, s) + cache[Req{}] = append(cache[Req{}], s) + } + return outputs, nil +} diff --git a/expr/func_persecond_test.go b/expr/func_persecond_test.go new file mode 100644 index 0000000000..1c83d144fe --- /dev/null +++ b/expr/func_persecond_test.go @@ -0,0 +1,199 @@ +package expr + +import ( + "math" + "testing" + + "github.com/raintank/metrictank/api/models" + "gopkg.in/raintank/schema.v1" +) + +var aPerSecond = []schema.Point{ + {Val: math.NaN(), Ts: 10}, // nan to 0 + {Val: 0, Ts: 20}, // 0 to 0 + {Val: 0.55, Ts: 30}, // 0 to 5.5 + {Val: math.NaN(), Ts: 40}, // 5.5 to nan + {Val: math.NaN(), Ts: 50}, // nan to nan + {Val: math.NaN(), Ts: 60}, // nan to 1234567890 +} + +var bPerSecond = []schema.Point{ + {Val: math.NaN(), Ts: 10}, // nan to 0 + {Val: math.MaxFloat64 / 10, Ts: 20}, // 0 to maxFloat + {Val: 0, Ts: 30}, // maxFloat to maxFloat -20. really NaN but floating point limitation -> 0 + {Val: math.NaN(), Ts: 40}, // maxFloat -20 to nan + {Val: math.NaN(), Ts: 50}, // nan to 1234567890 + {Val: math.NaN(), Ts: 60}, // 1234567890 to nan +} + +var cPerSecond = []schema.Point{ + {Val: math.NaN(), Ts: 10}, // nan to 0 + {Val: 0, Ts: 20}, // 0 to 0 + {Val: 0.1, Ts: 30}, // 0 to 1 + {Val: 0.1, Ts: 40}, // 1 to 2 + {Val: 0.1, Ts: 50}, // 2 to 3 + {Val: 0.1, Ts: 60}, // 3 to 4 +} + +var dPerSecondMax255 = []schema.Point{ + {Val: math.NaN(), Ts: 10}, // nan to 0 + {Val: 3.3, Ts: 20}, // 0 to 33 + {Val: float64(199-33) / 10, Ts: 30}, // 33 to 199 + {Val: float64(29-199+256) / 10, Ts: 40}, // 199 to 29, overflowed after 255 + {Val: float64(80-29) / 10, Ts: 50}, // 29 to 80 + {Val: float64(250-80) / 10, Ts: 60}, // 80 to 250 +} + +func TestPerSecondSingle(t *testing.T) { + testPerSecond( + "identity", + [][]models.Series{ + { + { + Interval: 10, + Target: "a", + Datapoints: getCopy(a), + }, + }, + }, + []models.Series{ + { + Interval: 10, + Target: "perSecond(a)", + Datapoints: getCopy(aPerSecond), + }, + }, + 0, + t, + ) +} + +func TestPerSecondSingleMaxValue(t *testing.T) { + testPerSecond( + "identity-counter8bit", + [][]models.Series{ + { + { + Interval: 10, + Target: "counter8bit", + Datapoints: getCopy(d), + }, + }, + }, + []models.Series{ + { + Interval: 10, + Target: "perSecond(counter8bit)", + Datapoints: getCopy(dPerSecondMax255), + }, + }, + 255, + t, + ) +} + +func TestPerSecondMulti(t *testing.T) { + testPerSecond( + "multiple-series", + [][]models.Series{ + { + { + Interval: 10, + Target: "a", + Datapoints: getCopy(a), + }, + { + Interval: 10, + Target: "b.*", + Datapoints: getCopy(b), + }, + }, + }, + []models.Series{ + { + Target: "perSecond(a)", + Datapoints: getCopy(aPerSecond), + }, + { + Target: "perSecond(b.*)", + Datapoints: getCopy(bPerSecond), + }, + }, + 0, + t, + ) +} +func TestPerSecondMultiMulti(t *testing.T) { + testPerSecond( + "multiple-serieslists", + [][]models.Series{ + { + { + Interval: 10, + Target: "a", + Datapoints: getCopy(a), + }, + { + Interval: 10, + Target: "b.foo{bar,baz}", + Datapoints: getCopy(b), + }, + }, + { + { + Interval: 10, + Target: "movingAverage(bar, '1min')", + Datapoints: getCopy(c), + }, + }, + }, + []models.Series{ + { + Target: "perSecond(a)", + Datapoints: getCopy(aPerSecond), + }, + { + Target: "perSecond(b.foo{bar,baz})", + Datapoints: getCopy(bPerSecond), + }, + { + Target: "perSecond(movingAverage(bar, '1min'))", + Datapoints: getCopy(cPerSecond), + }, + }, + 0, + t, + ) +} + +func testPerSecond(name string, in [][]models.Series, out []models.Series, max int64, t *testing.T) { + f := NewPerSecond() + ps := f.(*FuncPerSecond) + for i := range in { + ps.in = append(ps.in, NewMock(in[i])) + ps.maxValue = max + } + gots, err := f.Exec(make(map[Req][]models.Series)) + if err != nil { + t.Fatalf("case %q: err should be nil. got %q", name, err) + } + if len(gots) != len(out) { + t.Fatalf("case %q: perSecond len output expected %d, got %d", name, len(out), len(gots)) + } + for i, g := range gots { + exp := out[i] + if g.Target != exp.Target { + t.Fatalf("case %q: expected target %q, got %q", name, exp.Target, g.Target) + } + if len(g.Datapoints) != len(exp.Datapoints) { + t.Fatalf("case %q: len output expected %d, got %d", name, len(exp.Datapoints), len(g.Datapoints)) + } + for j, p := range g.Datapoints { + bothNaN := math.IsNaN(p.Val) && math.IsNaN(exp.Datapoints[j].Val) + if (bothNaN || p.Val == exp.Datapoints[j].Val) && p.Ts == exp.Datapoints[j].Ts { + continue + } + t.Fatalf("case %q: output point %d - expected %v got %v", name, j, exp.Datapoints[j], p) + } + } +} diff --git a/expr/func_smartsummarize.go b/expr/func_smartsummarize.go new file mode 100644 index 0000000000..c715b9bb41 --- /dev/null +++ b/expr/func_smartsummarize.go @@ -0,0 +1,32 @@ +package expr + +import "github.com/raintank/metrictank/api/models" + +type FuncSmartSummarize struct { + in GraphiteFunc + interval string + fn string + alignToFrom bool +} + +func NewSmartSummarize() GraphiteFunc { + return &FuncSmartSummarize{fn: "sum"} +} + +func (s *FuncSmartSummarize) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesList{val: &s.in}, + ArgString{key: "interval", val: &s.interval}, + ArgString{key: "func", opt: true, val: &s.fn}, + ArgBool{key: "alignToFrom", opt: true, val: &s.alignToFrom}, + }, []Arg{ArgSeries{}} +} + +func (s *FuncSmartSummarize) NeedRange(from, to uint32) (uint32, uint32) { + return from, to +} + +func (s *FuncSmartSummarize) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + series, err := s.in.Exec(cache) + return series, err +} diff --git a/expr/func_sumseries.go b/expr/func_sumseries.go index 761d6ad52a..84d6ec4b57 100644 --- a/expr/func_sumseries.go +++ b/expr/func_sumseries.go @@ -3,44 +3,42 @@ package expr import ( "fmt" "math" - "reflect" "github.com/raintank/metrictank/api/models" "gopkg.in/raintank/schema.v1" ) type FuncSumSeries struct { + in []GraphiteFunc } -func NewSumSeries() Func { - return FuncSumSeries{} +func NewSumSeries() GraphiteFunc { + return &FuncSumSeries{} } -func (s FuncSumSeries) Signature() ([]argType, []argType) { - return []argType{seriesLists}, []argType{series} +func (s *FuncSumSeries) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesLists{val: &s.in}, + }, []Arg{ArgSeries{}} } -func (s FuncSumSeries) Init(args []*expr) error { - return nil -} - -func (s FuncSumSeries) NeedRange(from, to uint32) (uint32, uint32) { +func (s *FuncSumSeries) NeedRange(from, to uint32) (uint32, uint32) { return from, to } -func (s FuncSumSeries) Exec(cache map[Req][]models.Series, inputs ...interface{}) ([]interface{}, error) { +func (s *FuncSumSeries) Exec(cache map[Req][]models.Series) ([]models.Series, error) { var series []models.Series - for _, input := range inputs { - seriesList, ok := input.([]models.Series) - if !ok { - return nil, ErrBadArgument{reflect.TypeOf([]models.Series{}), reflect.TypeOf(input)} + for i := range s.in { + in, err := s.in[i].Exec(cache) + if err != nil { + return nil, err } - series = append(series, seriesList...) - + series = append(series, in...) } + if len(series) == 1 { series[0].Target = fmt.Sprintf("sumSeries(%s)", series[0].QueryPatt) - return []interface{}{series[0]}, nil + return series, nil } out := pointSlicePool.Get().([]schema.Point) for i := 0; i < len(series[0].Datapoints); i++ { @@ -66,5 +64,5 @@ func (s FuncSumSeries) Exec(cache map[Req][]models.Series, inputs ...interface{} Interval: series[0].Interval, } cache[Req{}] = append(cache[Req{}], output) - return []interface{}{output}, nil + return []models.Series{output}, nil } diff --git a/expr/func_sumseries_test.go b/expr/func_sumseries_test.go index 494f1c7b8c..ea113831bf 100644 --- a/expr/func_sumseries_test.go +++ b/expr/func_sumseries_test.go @@ -99,21 +99,18 @@ func TestSumSeriesMultipleDiffQuery(t *testing.T) { func testSumSeries(name string, in [][]models.Series, out models.Series, t *testing.T) { f := NewSumSeries() - var input []interface{} + sum := f.(*FuncSumSeries) for _, i := range in { - input = append(input, i) + sum.in = append(sum.in, NewMock(i)) } - got, err := f.Exec(make(map[Req][]models.Series), input...) + got, err := f.Exec(make(map[Req][]models.Series)) if err != nil { t.Fatalf("case %q: err should be nil. got %q", name, err) } if len(got) != 1 { t.Fatalf("case %q: sumSeries output should be only 1 thing (a series) not %d", name, len(got)) } - g, ok := got[0].(models.Series) - if !ok { - t.Fatalf("case %q: expected sum output of models.Series type", name) - } + g := got[0] if g.Target != out.Target { t.Fatalf("case %q: expected target %q, got %q", name, out.Target, g.Target) } @@ -184,7 +181,9 @@ func benchmarkSumSeries(b *testing.B, numSeries int, fn0, fn1 func() []schema.Po b.ResetTimer() for i := 0; i < b.N; i++ { f := NewSumSeries() - got, err := f.Exec(make(map[Req][]models.Series), interface{}(input)) + sum := f.(*FuncSumSeries) + sum.in = append(sum.in, NewMock(input)) + got, err := f.Exec(make(map[Req][]models.Series)) if err != nil { b.Fatalf("%s", err) } diff --git a/expr/func_transformnull.go b/expr/func_transformnull.go new file mode 100644 index 0000000000..ea6fc35717 --- /dev/null +++ b/expr/func_transformnull.go @@ -0,0 +1,65 @@ +package expr + +import ( + "fmt" + "math" + + "github.com/raintank/metrictank/api/models" + "gopkg.in/raintank/schema.v1" +) + +type FuncTransformNull struct { + in GraphiteFunc + def float64 +} + +func NewTransformNull() GraphiteFunc { + return &FuncTransformNull{nil, math.NaN()} +} + +func (s *FuncTransformNull) Signature() ([]Arg, []Arg) { + return []Arg{ + ArgSeriesList{val: &s.in}, + ArgFloat{key: "default", opt: true, val: &s.def}, + }, []Arg{ArgSeriesList{}} +} + +func (s *FuncTransformNull) NeedRange(from, to uint32) (uint32, uint32) { + return from, to +} + +func (s *FuncTransformNull) Exec(cache map[Req][]models.Series) ([]models.Series, error) { + series, err := s.in.Exec(cache) + if err != nil { + return nil, err + } + custom := true + if math.IsNaN(s.def) { + s.def = 0 + custom = false + } + + var out []models.Series + for _, serie := range series { + var target string + if custom { + target = fmt.Sprintf("transFormNull(%s,%f)", serie.Target, s.def) + } else { + target = fmt.Sprintf("transFormNull(%s)", serie.Target) + } + transformed := models.Series{ + Target: target, + Datapoints: pointSlicePool.Get().([]schema.Point), + Interval: serie.Interval, + } + for _, p := range serie.Datapoints { + if math.IsNaN(p.Val) { + p.Val = s.def + } + transformed.Datapoints = append(transformed.Datapoints, p) + } + out = append(out, transformed) + cache[Req{}] = append(cache[Req{}], transformed) + } + return out, nil +} diff --git a/expr/funcs.go b/expr/funcs.go index e94bc2a041..344b3636de 100644 --- a/expr/funcs.go +++ b/expr/funcs.go @@ -2,39 +2,28 @@ package expr import "github.com/raintank/metrictank/api/models" -type argType uint8 - -// argument types. to let functions describe their inputs and outputs -// potentially not as strict as reality (e.g. movingAverage windowsize is categorized as a str) that's why we have the extra validation step -const ( - series argType = iota // a single series - seriesList // a list of series - seriesLists // one or multiple seriesLists - integer // number without decimals - float // number potentially with decimals - str // string -) - -type Func interface { - // Signature returns the list of argument types that must be provided as input, and those that will be returned - // NewPlan() will only create the plan of the expressions it parsed correspond to the signatures provided by the function - Signature() ([]argType, []argType) - // Init passes in the expressions parsed (they are pre-validated to correspond to the given signature via Signature() in terms of number and type) - // So that you can initialize internal state and perform deeper validation of arguments for functions that have specific requirements (e.g. a number argument that can only be even) - Init([]*expr) error - // NeedRange allows a func to express that to be able to return data in the given from to, it will need input data in the returned from-to window. +type GraphiteFunc interface { + // Signature declares input and output arguments (return values) + // input args can be optional in which case they can be specified positionally or via keys if you want to specify params that come after un-specified optional params + // the val pointers of each input Arg should point to a location accessible to the function, + // so that the planner can set up the inputs for your function based on user input. + // NewPlan() will only create the plan if the expressions it parsed correspond to the signatures provided by the function + Signature() ([]Arg, []Arg) + // NeedRange allows a func to express that to be able to return data in the given from-to, it will need input data in the returned from-to window. // (e.g. movingAverage of 5min needs data as of from-5min) + // this function will be called after validating and setting up all non-series and non-serieslist parameters. + // this way a function can convey the needed range by leveraging any relevant integer, string, bool, etc parameters. + // after this function is called, series and serieslist inputs will be set up. NeedRange(from, to uint32) (uint32, uint32) - // Exec executes the function with its arguments. - // it is passed in a map of all input data it may need as well as: - // etConst (number) -> float64 - // etString -> str - // etName/etFunc -> []models.Series or models.Series if the previous function returned a series - // supported return values: models.Series, []models.Series - Exec(map[Req][]models.Series, ...interface{}) ([]interface{}, error) + // Exec executes the function. the function should call any input functions, do its processing, and return output. + // IMPORTANT: for performance and correctness, functions should + // * not modify slices of points that they get from their inputs + // * use the pool to get new slices in which to store any new/modified dat + // * add the newly created slices into the cache so they can be reclaimed after the output is consumed + Exec(map[Req][]models.Series) ([]models.Series, error) } -type funcConstructor func() Func +type funcConstructor func() GraphiteFunc type funcDef struct { constr funcConstructor @@ -44,13 +33,18 @@ type funcDef struct { var funcs map[string]funcDef func init() { + // keys must be sorted alphabetically. but functions with aliases can go together, in which case they are sorted by the first of their aliases funcs = map[string]funcDef{ - "alias": {NewAlias, true}, - "sum": {NewSumSeries, true}, - "sumSeries": {NewSumSeries, true}, - "avg": {NewAvgSeries, true}, - "averageSeries": {NewAvgSeries, true}, - "movingAverage": {NewMovingAverage, false}, - "consolidateBy": {NewConsolidateBy, true}, + "alias": {NewAlias, true}, + "aliasByNode": {NewAliasByNode, true}, + "avg": {NewAvgSeries, true}, + "averageSeries": {NewAvgSeries, true}, + "consolidateBy": {NewConsolidateBy, true}, + "movingAverage": {NewMovingAverage, false}, + "perSecond": {NewPerSecond, true}, + "smartSummarize": {NewSmartSummarize, false}, + "sum": {NewSumSeries, true}, + "sumSeries": {NewSumSeries, true}, + "transformNull": {NewTransformNull, true}, } } diff --git a/expr/parse.go b/expr/parse.go index cdb11b87b3..9b18bb3707 100644 --- a/expr/parse.go +++ b/expr/parse.go @@ -5,13 +5,19 @@ import ( "fmt" "reflect" "strconv" + "strings" ) var ( - ErrMissingArg = errors.New("argument missing") - ErrTooManyArg = errors.New("too many arguments") - ErrMissingTimeseries = errors.New("missing time series argument") - ErrWildcardNotAllowed = errors.New("found wildcard where series expected") + ErrMissingArg = errors.New("argument missing") + ErrTooManyArg = errors.New("too many arguments") + ErrMissingTimeseries = errors.New("missing time series argument") + ErrWildcardNotAllowed = errors.New("found wildcard where series expected") + ErrMissingExpr = errors.New("missing expression") + ErrMissingComma = errors.New("missing comma") + ErrMissingQuote = errors.New("missing quote") + ErrUnexpectedCharacter = errors.New("unexpected character") + ErrIllegalCharacter = errors.New("illegal character for function name") ) type ErrBadArgument struct { @@ -38,6 +44,32 @@ func (e ErrUnknownFunction) Error() string { return fmt.Sprintf("unknown function %q", string(e)) } +type ErrUnknownKwarg struct { + key string +} + +func (e ErrUnknownKwarg) Error() string { + return fmt.Sprintf("unknown keyword argument %q", e) +} + +type ErrBadKwarg struct { + key string + exp Arg + got exprType +} + +func (e ErrBadKwarg) Error() string { + return fmt.Sprintf("keyword argument %q bad type. expected %T - got %s", e.key, e.exp, e.got) +} + +type ErrKwargSpecifiedTwice struct { + key string +} + +func (e ErrKwargSpecifiedTwice) Error() string { + return fmt.Sprintf("keyword argument %q specified twice", e.key) +} + type MetricRequest struct { Metric string From int32 @@ -61,6 +93,8 @@ func ParseMany(targets []string) ([]*expr, error) { return out, nil } +// Parses an expression string and turns it into an expression +// also returns any leftover data that could not be parsed func Parse(e string) (*expr, string, error) { // skip whitespace for len(e) > 1 && e[0] == ' ' { @@ -72,8 +106,15 @@ func Parse(e string) (*expr, string, error) { } if '0' <= e[0] && e[0] <= '9' || e[0] == '-' || e[0] == '+' { - val, valStr, e, err := parseConst(e) - return &expr{float: val, str: valStr, etype: etConst}, e, err + return parseConst(e) + } + + if strings.HasPrefix(e, "True") || strings.HasPrefix(e, "true") { + return &expr{bool: true, str: e[:4], etype: etBool}, e[4:], nil + } + + if strings.HasPrefix(e, "False") || strings.HasPrefix(e, "false") { + return &expr{bool: false, str: e[:5], etype: etBool}, e[5:], nil } if e[0] == '\'' || e[0] == '"' { @@ -88,10 +129,16 @@ func Parse(e string) (*expr, string, error) { } if e != "" && e[0] == '(' { + for i := range name { + if !isFnChar(name[i]) { + return nil, "", ErrIllegalCharacter + } + } + exp := &expr{str: name, etype: etFunc} - argString, posArgs, namedArgs, e, err := parseArgList(e) - exp.argsStr = argString + ArgString, posArgs, namedArgs, e, err := parseArgList(e) + exp.argsStr = ArgString exp.args = posArgs exp.namedArgs = namedArgs @@ -101,17 +148,6 @@ func Parse(e string) (*expr, string, error) { return &expr{str: name, etype: etName}, e, nil } -var ( - // ErrMissingExpr is a parse error returned when an expression is missing. - ErrMissingExpr = errors.New("missing expression") - // ErrMissingComma is a parse error returned when an expression is missing a comma. - ErrMissingComma = errors.New("missing comma") - // ErrMissingQuote is a parse error returned when an expression is missing a quote. - ErrMissingQuote = errors.New("missing quote") - // ErrUnexpectedCharacter is a parse error returned when an expression contains an unexpected character. - ErrUnexpectedCharacter = errors.New("unexpected character") -) - func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) { var ( @@ -123,7 +159,7 @@ func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) { panic("arg list should start with paren") } - argString := e[1:] + ArgString := e[1:] e = e[1:] @@ -151,19 +187,15 @@ func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) { return "", nil, nil, "", ErrMissingComma } - if argCont.etype != etConst && argCont.etype != etName && argCont.etype != etString { - return "", nil, nil, eCont, ErrBadArgumentStr{"const, name or string", string(argCont.etype)} + if argCont.etype != etInt && argCont.etype != etFloat && argCont.etype != etName && argCont.etype != etString && argCont.etype != etBool { + return "", nil, nil, eCont, ErrBadArgumentStr{"int, float, name, bool or string", string(argCont.etype)} } if namedArgs == nil { namedArgs = make(map[string]*expr) } - namedArgs[arg.str] = &expr{ - etype: argCont.etype, - float: argCont.float, - str: argCont.str, - } + namedArgs[arg.str] = argCont e = eCont } else { @@ -176,7 +208,7 @@ func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) { } if e[0] == ')' { - return argString[:len(argString)-len(e)], posArgs, namedArgs, e[1:], nil + return ArgString[:len(ArgString)-len(e)], posArgs, namedArgs, e[1:], nil } if e[0] != ',' && e[0] != ' ' { @@ -197,25 +229,39 @@ func isNameChar(r byte) bool { r == '<' || r == '>' } +func isFnChar(r byte) bool { + return false || + 'a' <= r && r <= 'z' || + 'A' <= r && r <= 'Z' || + '0' <= r && r <= '9' +} + func isDigit(r byte) bool { return '0' <= r && r <= '9' } -func parseConst(s string) (float64, string, string, error) { +func parseConst(s string) (*expr, string, error) { var i int + var float bool // All valid characters for a floating-point constant // Just slurp them all in and let ParseFloat sort 'em out for i < len(s) && (isDigit(s[i]) || s[i] == '.' || s[i] == '+' || s[i] == '-' || s[i] == 'e' || s[i] == 'E') { + // note that exponent syntax results into a float value. + // so even values like 1e3 (1000) or 2000e-3 (2) which can be expressed as integers, + // are considered floating point values. if a function expets an int, then just don't use 'e' syntax. + if s[i] == '.' || s[i] == 'e' || s[i] == 'E' { + float = true + } i++ } - v, err := strconv.ParseFloat(s[:i], 64) - if err != nil { - return 0, "", "", err + if float { + v, err := strconv.ParseFloat(s[:i], 64) + return &expr{float: v, str: s[:i], etype: etFloat}, s[i:], err } - - return v, s[:i], s[i:], err + v, err := strconv.ParseInt(s[:i], 10, 64) + return &expr{int: v, str: s[:i], etype: etInt}, s[i:], err } func parseName(s string) (string, string) { @@ -274,216 +320,29 @@ func parseString(s string) (string, string, error) { return "", "", ErrMissingQuote } - //fmt.Println("> string", s[:i]) return s[:i], s[i+1:], nil } -func getStringArg(e *expr, n int) (string, error) { - if len(e.args) <= n { - return "", ErrMissingArg - } - - return doGetStringArg(e.args[n]) -} - -func getStringArgDefault(e *expr, n int, s string) (string, error) { - if len(e.args) <= n { - return s, nil - } - - return doGetStringArg(e.args[n]) -} - -func getStringNamedOrPosArgDefault(e *expr, k string, n int, s string) (string, error) { - if a := getNamedArg(e, k); a != nil { - return doGetStringArg(a) - } - - return getStringArgDefault(e, n, s) -} - -func doGetStringArg(e *expr) (string, error) { - if e.etype != etString { - return "", ErrBadArgumentStr{"string", string(e.etype)} - } - - return e.str, nil -} - -/* -func getIntervalArg(e *expr, n int, defaultSign int) (int32, error) { - if len(e.args) <= n { - return 0, ErrMissingArg - } - - if e.args[n].etype != etString { - return 0, ErrBadArgumentStr{"string", string(e.etype)} - } - - seconds, err := IntervalString(e.args[n].valStr, defaultSign) - if err != nil { - return 0, ErrBadArgumentStr{"const", string(e.etype)} - } - - return seconds, nil -} -*/ - -func getFloatArg(e *expr, n int) (float64, error) { - if len(e.args) <= n { - return 0, ErrMissingArg - } - - return doGetFloatArg(e.args[n]) -} - -func getFloatArgDefault(e *expr, n int, v float64) (float64, error) { - if len(e.args) <= n { - return v, nil - } - - return doGetFloatArg(e.args[n]) -} - -func getFloatNamedOrPosArgDefault(e *expr, k string, n int, v float64) (float64, error) { - if a := getNamedArg(e, k); a != nil { - return doGetFloatArg(a) - } - - return getFloatArgDefault(e, n, v) -} - -func doGetFloatArg(e *expr) (float64, error) { - if e.etype != etConst { - return 0, ErrBadArgumentStr{"const", string(e.etype)} - } - - return e.float, nil -} - -func getIntArg(e *expr, n int) (int, error) { - if len(e.args) <= n { - return 0, ErrMissingArg - } - - return doGetIntArg(e.args[n]) -} - -func getIntArgs(e *expr, n int) ([]int, error) { - - if len(e.args) <= n { - return nil, ErrMissingArg - } - - var ints []int - - for i := n; i < len(e.args); i++ { - a, err := getIntArg(e, i) - if err != nil { - return nil, err - } - ints = append(ints, a) - } - - return ints, nil -} - -func getIntArgDefault(e *expr, n int, d int) (int, error) { - if len(e.args) <= n { - return d, nil - } - - return doGetIntArg(e.args[n]) -} - -func getIntNamedOrPosArgDefault(e *expr, k string, n int, d int) (int, error) { - if a := getNamedArg(e, k); a != nil { - return doGetIntArg(a) - } - - return getIntArgDefault(e, n, d) -} - -func doGetIntArg(e *expr) (int, error) { - if e.etype != etConst { - return 0, ErrBadArgumentStr{"const", string(e.etype)} - } - - return int(e.float), nil -} - -func getBoolNamedOrPosArgDefault(e *expr, k string, n int, b bool) (bool, error) { - if a := getNamedArg(e, k); a != nil { - return doGetBoolArg(a) - } - - return getBoolArgDefault(e, n, b) -} - -func getBoolArgDefault(e *expr, n int, b bool) (bool, error) { - if len(e.args) <= n { - return b, nil - } - - return doGetBoolArg(e.args[n]) -} - -func doGetBoolArg(e *expr) (bool, error) { - if e.etype != etName { - return false, ErrBadArgumentStr{"const", string(e.etype)} - } - - // names go into 'target' - switch e.str { - case "False", "false": - return false, nil - case "True", "true": - return true, nil - } - - return false, ErrBadArgumentStr{"const", string(e.etype)} -} - -/* -func getSeriesArg(arg *expr, from, until int32, values map[MetricRequest][]*MetricData) ([]*MetricData, error) { - if arg.etype != etName && arg.etype != etFunc { - return nil, ErrMissingTimeseries - } - - a, _ := EvalExpr(arg, from, until, values) - - if len(a) == 0 { - return nil, ErrSeriesDoesNotExist - } - - return a, nil -} - -func getSeriesArgs(e []*expr, from, until int32, values map[MetricRequest][]*MetricData) ([]*MetricData, error) { - - var args []*MetricData - - for _, arg := range e { - a, err := getSeriesArg(arg, from, until, values) - if err != nil { - return nil, err +// exctractMetric searches for a metric name in `m' +// metric name is defined to be a series of name characters terminated by a comma +func extractMetric(m string) string { + start := 0 + end := 0 + curlyBraces := 0 + for end < len(m) { + if m[end] == '{' { + curlyBraces++ + } else if m[end] == '}' { + curlyBraces-- + } else if m[end] == ')' || (m[end] == ',' && curlyBraces == 0) { + return m[start:end] + } else if !(isNameChar(m[end]) || m[end] == ',') { + start = end + 1 } - args = append(args, a...) - } - - if len(args) == 0 { - return nil, ErrSeriesDoesNotExist - } - - return args, nil -} -*/ -func getNamedArg(e *expr, name string) *expr { - if a, ok := e.namedArgs[name]; ok { - return a + end++ } - return nil + return m[start:end] } diff --git a/expr/parse_test.go b/expr/parse_test.go index e877ed58f8..b4bf6d7935 100644 --- a/expr/parse_test.go +++ b/expr/parse_test.go @@ -5,23 +5,28 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/sergi/go-diff/diffmatchpatch" ) func TestParse(t *testing.T) { tests := []struct { - s string - e *expr + s string + e *expr + err error }{ {"metric", &expr{str: "metric"}, + nil, }, { "metric.foo", &expr{str: "metric.foo"}, + nil, }, {"metric.*.foo", &expr{str: "metric.*.foo"}, + nil, }, { "func(metric)", @@ -31,6 +36,7 @@ func TestParse(t *testing.T) { args: []*expr{{str: "metric"}}, argsStr: "metric", }, + nil, }, { "func(metric1,metric2,metric3)", @@ -43,6 +49,7 @@ func TestParse(t *testing.T) { {str: "metric3"}}, argsStr: "metric1,metric2,metric3", }, + nil, }, { "func1(metric1,func2(metricA, metricB),metric3)", @@ -59,15 +66,18 @@ func TestParse(t *testing.T) { {str: "metric3"}}, argsStr: "metric1,func2(metricA, metricB),metric3", }, + nil, }, { "3", - &expr{float: 3, str: "3", etype: etConst}, + &expr{int: 3, str: "3", etype: etInt}, + nil, }, { "3.1", - &expr{float: 3.1, str: "3.1", etype: etConst}, + &expr{float: 3.1, str: "3.1", etype: etFloat}, + nil, }, { "func1(metric1, 3, 1e2, 2e-3)", @@ -76,12 +86,13 @@ func TestParse(t *testing.T) { etype: etFunc, args: []*expr{ {str: "metric1"}, - {float: 3, str: "3", etype: etConst}, - {float: 100, str: "1e2", etype: etConst}, - {float: 0.002, str: "2e-3", etype: etConst}, + {int: 3, str: "3", etype: etInt}, + {float: 100, str: "1e2", etype: etFloat}, + {float: 0.002, str: "2e-3", etype: etFloat}, }, argsStr: "metric1, 3, 1e2, 2e-3", }, + nil, }, { "func1(metric1, 'stringconst')", @@ -94,6 +105,7 @@ func TestParse(t *testing.T) { }, argsStr: "metric1, 'stringconst'", }, + nil, }, { `func1(metric1, "stringconst")`, @@ -106,6 +118,7 @@ func TestParse(t *testing.T) { }, argsStr: `metric1, "stringconst"`, }, + nil, }, { "func1(metric1, -3)", @@ -114,10 +127,11 @@ func TestParse(t *testing.T) { etype: etFunc, args: []*expr{ {str: "metric1"}, - {float: -3, str: "-3", etype: etConst}, + {int: -3, str: "-3", etype: etInt}, }, argsStr: "metric1, -3", }, + nil, }, { @@ -127,11 +141,12 @@ func TestParse(t *testing.T) { etype: etFunc, args: []*expr{ {str: "metric1"}, - {float: -3, str: "-3", etype: etConst}, + {int: -3, str: "-3", etype: etInt}, {str: "foo", etype: etString}, }, argsStr: "metric1, -3 , 'foo' ", }, + nil, }, { @@ -147,6 +162,7 @@ func TestParse(t *testing.T) { }, argsStr: "metric, key='value'", }, + nil, }, { "func(metric, key=true)", @@ -157,10 +173,24 @@ func TestParse(t *testing.T) { {str: "metric"}, }, namedArgs: map[string]*expr{ - "key": {etype: etName, str: "true"}, + "key": {etype: etBool, str: "true", bool: true}, }, argsStr: "metric, key=true", }, + nil, + }, + { + "func(metric, False)", + &expr{ + str: "func", + etype: etFunc, + args: []*expr{ + {str: "metric"}, + {etype: etBool, str: "False", bool: false}, + }, + argsStr: "metric, False", + }, + nil, }, { "func(metric, key=1)", @@ -171,10 +201,11 @@ func TestParse(t *testing.T) { {str: "metric"}, }, namedArgs: map[string]*expr{ - "key": {etype: etConst, str: "1", float: 1}, + "key": {etype: etInt, str: "1", int: 1}, }, argsStr: "metric, key=1", }, + nil, }, { "func(metric, key=0.1)", @@ -185,10 +216,11 @@ func TestParse(t *testing.T) { {str: "metric"}, }, namedArgs: map[string]*expr{ - "key": {etype: etConst, str: "0.1", float: 0.1}, + "key": {etype: etFloat, str: "0.1", float: 0.1}, }, argsStr: "metric, key=0.1", }, + nil, }, { @@ -198,13 +230,14 @@ func TestParse(t *testing.T) { etype: etFunc, args: []*expr{ {str: "metric"}, - {etype: etConst, str: "1", float: 1}, + {etype: etInt, str: "1", int: 1}, }, namedArgs: map[string]*expr{ "key": {etype: etString, str: "value"}, }, argsStr: "metric, 1, key='value'", }, + nil, }, { "func(metric, key='value', 1)", @@ -213,13 +246,14 @@ func TestParse(t *testing.T) { etype: etFunc, args: []*expr{ {str: "metric"}, - {etype: etConst, str: "1", float: 1}, + {etype: etInt, str: "1", int: 1}, }, namedArgs: map[string]*expr{ "key": {etype: etString, str: "value"}, }, argsStr: "metric, key='value', 1", }, + nil, }, { "func(metric, key1='value1', key2='value two is here')", @@ -235,6 +269,7 @@ func TestParse(t *testing.T) { }, argsStr: "metric, key1='value1', key2='value two is here'", }, + nil, }, { "func(metric, key2='value2', key1='value1')", @@ -250,6 +285,7 @@ func TestParse(t *testing.T) { }, argsStr: "metric, key2='value2', key1='value1'", }, + nil, }, { @@ -258,6 +294,7 @@ func TestParse(t *testing.T) { str: "foo.{bar,baz}.qux", etype: etName, }, + nil, }, { `foo.b[0-9].qux`, @@ -265,6 +302,7 @@ func TestParse(t *testing.T) { str: "foo.b[0-9].qux", etype: etName, }, + nil, }, { `virt.v1.*.text-match:`, @@ -272,17 +310,91 @@ func TestParse(t *testing.T) { str: "virt.v1.*.text-match:", etype: etName, }, + nil, + }, + { + `foo.()`, + nil, + ErrIllegalCharacter, + }, + { + `foo.*()`, + nil, + ErrIllegalCharacter, + }, + { + `foo.{bar,baz}.qux()`, + nil, + ErrIllegalCharacter, }, } for _, tt := range tests { e, _, err := Parse(tt.s) - if err != nil { - t.Errorf("parse for %+v failed: err=%v", tt.s, err) + if err != tt.err { + t.Errorf("case %+v expected err %v, got %v", tt.s, tt.err, err) continue } if !reflect.DeepEqual(e, tt.e) { - t.Errorf("parse for %+v failed:\ngot %+s\nwant %+v", tt.s, spew.Sdump(e), spew.Sdump(tt.e)) + spew.Config.DisablePointerAddresses = true + exp := spew.Sdump(tt.e) + got := spew.Sdump(e) + spew.Config.DisablePointerAddresses = false + dmp := diffmatchpatch.New() + diffs := dmp.DiffMain(exp, got, false) + format := `##### case %+v ##### +### expected ### +%+v + +### got ### +%s + +###diff ### +%s` + t.Errorf(format, tt.s, exp, got, dmp.DiffPrettyText(diffs)) + } + + } +} + +func TestExtractMetric(t *testing.T) { + var tests = []struct { + in string + out string + }{ + { + "foo", + "foo", + }, + { + "perSecond(foo)", + "foo", + }, + { + "foo.bar", + "foo.bar", + }, + { + "perSecond(foo.bar", + "foo.bar", + }, + { + "movingAverage(foo.bar,10)", + "foo.bar", + }, + { + "scale(scaleToSeconds(nonNegativeDerivative(foo.bar),60),60)", + "foo.bar", + }, + { + "divideSeries(foo.bar,baz.quux)", + "foo.bar", + }, + } + + for _, tt := range tests { + if m := extractMetric(tt.in); m != tt.out { + t.Errorf("extractMetric(%q)=%q, want %q", tt.in, m, tt.out) } } } diff --git a/expr/plan.go b/expr/plan.go index 377bf86dd5..5937663104 100644 --- a/expr/plan.go +++ b/expr/plan.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "sort" "github.com/raintank/metrictank/api/models" ) @@ -15,16 +16,16 @@ type Req struct { } type Plan struct { - Reqs []Req + Reqs []Req // data that needs to be fetched before functions can be executed + funcs []GraphiteFunc // top-level funcs to execute, the head of each tree for each target exprs []*expr MaxDataPoints uint32 From uint32 // global request scoped from To uint32 // global request scoped to - input map[Req][]models.Series // input data to work with. set via Run() + data map[Req][]models.Series // input data to work with. set via Run(), as well as // new data generated by processing funcs. useful for two reasons: - // 1) reuse partial calculations e.g. queries like target=movingAvg(sum(foo), 10)&target=sum(foo)A (TODO) + // 1) reuse partial calculations e.g. queries like target=movingAvg(sum(foo), 10)&target=sum(foo) (TODO) // 2) central place to return data back to pool when we're done. - generated map[Req][]models.Series } func (p Plan) Dump(w io.Writer) { @@ -52,196 +53,163 @@ func (p Plan) Dump(w io.Writer) { // * future version: allow functions to mark safe to pre-aggregate using consolidateBy or not func NewPlan(exprs []*expr, from, to, mdp uint32, stable bool, reqs []Req) (Plan, error) { var err error + var funcs []GraphiteFunc for _, e := range exprs { - reqs, err = newplan(e, from, to, stable, reqs) + var fn GraphiteFunc + fn, reqs, err = newplan(e, from, to, stable, reqs) if err != nil { return Plan{}, err } + funcs = append(funcs, fn) } return Plan{ Reqs: reqs, exprs: exprs, + funcs: funcs, MaxDataPoints: mdp, From: from, To: to, }, nil } -func newplan(e *expr, from, to uint32, stable bool, reqs []Req) ([]Req, error) { +// newplan adds requests as needed for the given expr, resolving function calls as needed +func newplan(e *expr, from, to uint32, stable bool, reqs []Req) (GraphiteFunc, []Req, error) { if e.etype != etFunc && e.etype != etName { - return nil, errors.New("request must be a function call or metric pattern") + return nil, nil, errors.New("request must be a function call or metric pattern") } if e.etype == etName { - reqs = append(reqs, Req{ + req := Req{ e.str, from, to, - }) - return reqs, nil + } + reqs = append(reqs, req) + return NewGet(req), reqs, nil } // here e.type is guaranteed to be etFunc fdef, ok := funcs[e.str] if !ok { - return nil, ErrUnknownFunction(e.str) + return nil, nil, ErrUnknownFunction(e.str) } if stable && !fdef.stable { - return nil, ErrUnknownFunction(e.str) + return nil, nil, ErrUnknownFunction(e.str) } - // now comes the interesting task of validating the arguments as specified by the function, + fn := fdef.constr() + reqs, err := newplanFunc(e, fn, from, to, stable, reqs) + return fn, reqs, err +} + +// newplanFunc adds requests as needed for the given expr, and validates the function input +// provided you already know the expression is a function call to the given function +func newplanFunc(e *expr, fn GraphiteFunc, from, to uint32, stable bool, reqs []Req) ([]Req, error) { + // first comes the interesting task of validating the arguments as specified by the function, // against the arguments that were parsed. - fn := fdef.constr() argsExp, _ := fn.Signature() + var err error - // note that signature may have seriesLists in it, which means one or more args of type seriesList - // so it's legal to have more e.args then (signature) args in that case. - if len(e.args) < len(argsExp) { - return nil, ErrMissingArg - } - // j tracks pos in e.args of next given arg to process - j := 0 - for _, argExp := range argsExp { - // we can't do extensive, accurate validation of the type here because what the output from a function we depend on - // might be dynamically typed. e.g. movingAvg returns 1..N series depending on how many it got as input - if len(e.args) <= j { + // note: + // * signature may have seriesLists in it, which means one or more args of type seriesList + // so it's legal to have more e.args than signature args in that case. + // * we can't do extensive, accurate validation of the type here because what the output from a function we depend on + // might be dynamically typed. e.g. movingAvg returns 1..N series depending on how many it got as input + + // first validate the mandatory args + pos := 0 // pos in args of next given arg to process + cutoff := 0 // marks the index of the first optional point (if any) + var argExp Arg + for cutoff, argExp = range argsExp { + if argExp.Optional() { + break + } + if len(e.args) <= pos { return nil, ErrMissingArg } - argGot := e.args[j] - switch argExp { - case series: - if argGot.etype != etName && argGot.etype != etFunc { - return nil, ErrBadArgumentStr{"func or name", string(argGot.etype)} - } - case seriesList: - if argGot.etype != etName && argGot.etype != etFunc { - return nil, ErrBadArgumentStr{"func or name", string(argGot.etype)} - } - case seriesLists: - if argGot.etype != etName && argGot.etype != etFunc { - return nil, ErrBadArgumentStr{"func or name", string(argGot.etype)} - } - // special case! consume all subsequent args (if any) in e.args that will also yield a seriesList - for len(e.args) > j+1 && (e.args[j+1].etype == etName || e.args[j+1].etype == etFunc) { - j += 1 - } - case integer: - if argGot.etype != etConst { - return nil, ErrBadArgumentStr{"int", string(argGot.etype)} - } - case float: - if argGot.etype != etConst { - return nil, ErrBadArgumentStr{"float", string(argGot.etype)} - } - case str: - if argGot.etype != etString { - return nil, ErrBadArgumentStr{"string", string(argGot.etype)} - } + pos, err = e.consumeBasicArg(pos, argExp) + if err != nil { + return nil, err + } + } + if !argExp.Optional() { + cutoff += 1 + } + + // we stopped iterating the mandatory args. + // any remaining args should be due to optional args otherwise there's too many + // we also track here which keywords can also be used for the given optional args + // so that those args should not be specified via their keys anymore. + + seenKwargs := make(map[string]struct{}) + for _, argOpt := range argsExp[cutoff:] { + if len(e.args) <= pos { + break // no more args specified. we're done. } - j += 1 + pos, err = e.consumeBasicArg(pos, argOpt) + if err != nil { + return nil, err + } + seenKwargs[argOpt.Key()] = struct{}{} } - // when we stop iterating, j should be a non-existent pos - if len(e.args) > j { + if len(e.args) > pos { return nil, ErrTooManyArg } - err := fn.Init(e.args) - if err != nil { - return nil, err + + // for any provided keyword args, verify that they are what the function stipulated + // and that they have not already been specified via their position + for key := range e.namedArgs { + _, ok := seenKwargs[key] + if ok { + return nil, ErrKwargSpecifiedTwice{key} + } + err = e.consumeKwarg(key, argsExp[cutoff:]) + if err != nil { + return nil, err + } + seenKwargs[key] = struct{}{} } + + // functions now have their non-series input args set, + // so they should now be able to declare the timerange they need from, to = fn.NeedRange(from, to) - // look at which arguments are requested - // if the args are series, they are to be requested with the potentially extended to/from - // if they are not, keep traversing the tree until we find out which metrics to fetch and for which durations - for _, arg := range e.args { - if arg.etype == etName || arg.etype == etFunc { - reqs, err = newplan(arg, from, to, stable, reqs) + // now that we know the needed timerange for the data coming into + // this function, we can set up the input arguments for the function + // that are series + pos = 0 + for _, argExp = range argsExp[:cutoff] { + switch argExp.(type) { + case ArgSeries, ArgSeriesList, ArgSeriesLists: + pos, reqs, err = e.consumeSeriesArg(pos, argExp, from, to, stable, reqs) if err != nil { return nil, err } + default: + return reqs, err } } - return reqs, nil + return reqs, err } // Run invokes all processing as specified in the plan (expressions, from/to) with the input as input func (p Plan) Run(input map[Req][]models.Series) ([]models.Series, error) { var out []models.Series - p.input = input - p.generated = make(map[Req][]models.Series) - for _, expr := range p.exprs { - o, err := p.run(p.From, p.To, expr) + p.data = input + for _, fn := range p.funcs { + series, err := fn.Exec(p.data) if err != nil { return nil, err } - out = append(out, o...) + sort.Sort(models.SeriesByTarget(series)) + out = append(out, series...) } return out, nil } -func (p Plan) run(from, to uint32, e *expr) ([]models.Series, error) { - if e.etype != etFunc && e.etype != etName { - panic("this should never happen. request must be a function call or metric pattern") - } - if e.etype == etName { - req := Req{ - e.str, - from, - to, - } - return p.input[req], nil - } - - // here e.type is guaranteed to be etFunc - fdef, ok := funcs[e.str] - if !ok { - panic(fmt.Sprintf("cannot find func %q. this should never happen as we should have validated function existence earlier", e.str)) - } - fn := fdef.constr() - err := fn.Init(e.args) - if err != nil { - return nil, err - } - from, to = fn.NeedRange(from, to) - // look at which arguments are requested - // if the args are series, they are to be requested with the potentially extended to/from - // if they are not, keep traversing the tree until we find out which metrics to fetch and for which durations - results := make([]interface{}, len(e.args)) - for i, arg := range e.args { - if arg.etype == etName || arg.etype == etFunc { - result, err := p.run(from, to, arg) - if err != nil { - return nil, err - } - results[i] = result - } else if arg.etype == etString { - results[i] = arg.str - } else { - // etype == etConst - results[i] = arg.float - } - } - // we now have all our args and can process the data and return - rets, err := fn.Exec(p.generated, results...) - if err != nil { - return nil, err - } - series := make([]models.Series, len(rets)) - for i, ret := range rets { - series[i] = ret.(models.Series) - } - return series, nil -} - // Clean returns all buffers (all input data + generated series along the way) // back to the pool. func (p Plan) Clean() { - for _, series := range p.input { - for _, serie := range series { - pointSlicePool.Put(serie.Datapoints[:0]) - } - } - for _, series := range p.generated { + for _, series := range p.data { for _, serie := range series { pointSlicePool.Put(serie.Datapoints[:0]) } diff --git a/expr/plan_test.go b/expr/plan_test.go new file mode 100644 index 0000000000..bc9710a527 --- /dev/null +++ b/expr/plan_test.go @@ -0,0 +1,162 @@ +package expr + +import ( + "reflect" + "testing" +) + +// here we use smartSummarize because it has multiple optional arguments which allows us to test some interesting things +func TestNewPlan(t *testing.T) { + + from := uint32(1000) + to := uint32(2000) + stable := true + + cases := []struct { + name string + args []*expr + namedArgs map[string]*expr + expReq []Req + expErr error + }{ + { + "2 args normal, 0 optional", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + }, + nil, + []Req{ + {"foo.bar.*", from, to}, + }, + nil, + }, + { + "2 args normal, 2 optional by position", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + {etype: etString, str: "sum"}, + {etype: etBool, bool: true}, + }, + nil, + []Req{ + {"foo.bar.*", from, to}, + }, + nil, + }, + { + "2 args normal, 2 optional by key", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + }, + map[string]*expr{ + "func": {etype: etString, str: "sum"}, + "alignToFrom": {etype: etBool, bool: true}, + }, + []Req{ + {"foo.bar.*", from, to}, + }, + nil, + }, + { + "2 args normal, 1 by position, 1 by keyword", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + {etype: etString, str: "sum"}, + }, + map[string]*expr{ + "alignToFrom": {etype: etBool, bool: true}, + }, + []Req{ + {"foo.bar.*", from, to}, + }, + nil, + }, + { + "2 args normal, 2 by position, 2 by keyword (duplicate!)", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + {etype: etString, str: "sum"}, + {etype: etBool, bool: true}, + }, + map[string]*expr{ + "func": {etype: etString, str: "sum"}, + "alignToFrom": {etype: etBool, bool: true}, + }, + nil, + ErrKwargSpecifiedTwice{"func"}, + }, + { + "2 args normal, 1 by position, 2 by keyword (duplicate!)", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + {etype: etString, str: "sum"}, + }, + map[string]*expr{ + "func": {etype: etString, str: "sum"}, + "alignToFrom": {etype: etBool, bool: true}, + }, + nil, + ErrKwargSpecifiedTwice{"func"}, + }, + { + "2 args normal, 0 by position, the first by keyword", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + }, + map[string]*expr{ + "func": {etype: etString, str: "sum"}, + }, + []Req{ + {"foo.bar.*", from, to}, + }, + nil, + }, + { + "2 args normal, 0 by position, the second by keyword", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + {etype: etString, str: "1hour"}, + }, + map[string]*expr{ + "alignToFrom": {etype: etBool, bool: true}, + }, + []Req{ + {"foo.bar.*", from, to}, + }, + nil, + }, + { + "missing required argument", + []*expr{ + {etype: etName, str: "foo.bar.*"}, + }, + nil, + nil, + ErrMissingArg, + }, + } + + fn := NewSmartSummarize() + for i, c := range cases { + e := &expr{ + etype: etFunc, + str: "smartSummarize", + args: c.args, + namedArgs: c.namedArgs, + } + req, err := newplanFunc(e, fn, from, to, stable, nil) + if !reflect.DeepEqual(err, c.expErr) { + t.Errorf("case %d: %q, expected error %v - got %v", i, c.name, c.expErr, err) + } + if !reflect.DeepEqual(req, c.expReq) { + t.Errorf("case %d: %q, expected req %v - got %v", i, c.name, c.expReq, req) + } + } +} diff --git a/expr/test.go b/expr/test.go index d1a2c0db8e..1d80beaf32 100644 --- a/expr/test.go +++ b/expr/test.go @@ -1,3 +1,5 @@ package expr -var results []interface{} +import "github.com/raintank/metrictank/api/models" + +var results []models.Series diff --git a/expr/types.go b/expr/types.go new file mode 100644 index 0000000000..378f0956d9 --- /dev/null +++ b/expr/types.go @@ -0,0 +1,100 @@ +// argument types. to let functions describe their inputs and outputs +package expr + +// Arg is an argument to a GraphiteFunc +// note how every implementation has a val property. +// this property should point to value accessible to the function. +// the value will be set up by the planner; it assures that +// by the time Func.Exec() is called, the function has access to all +// needed inputs, whether simple values, or in the case of ArgSeries* +// inputs other functions to call which will feed it data. +type Arg interface { + Key() string + Optional() bool +} + +// ArgSeries is a single series argument +// not generally used as input since graphite functions typically take multiple series as input +// but is useful to describe output +type ArgSeries struct { + key string + opt bool + val *GraphiteFunc +} + +func (a ArgSeries) Key() string { return a.key } +func (a ArgSeries) Optional() bool { return a.opt } + +// ArgSeriesList is a list of series argument, it can be 0..N series +type ArgSeriesList struct { + key string + opt bool + val *GraphiteFunc +} + +func (a ArgSeriesList) Key() string { return a.key } +func (a ArgSeriesList) Optional() bool { return a.opt } + +// ArgSeriesLists represents one or more lists of series inputs. +type ArgSeriesLists struct { + key string + opt bool + val *[]GraphiteFunc +} + +func (a ArgSeriesLists) Key() string { return a.key } +func (a ArgSeriesLists) Optional() bool { return a.opt } + +// ArgInt is a number without decimals +type ArgInt struct { + key string + opt bool + validator []Validator + val *int64 +} + +func (a ArgInt) Key() string { return a.key } +func (a ArgInt) Optional() bool { return a.opt } + +// ArgInts represents one or more numbers without decimals +type ArgInts struct { + key string + opt bool + validator []Validator + val *[]int64 +} + +func (a ArgInts) Key() string { return a.key } +func (a ArgInts) Optional() bool { return a.opt } + +// floating point number; potentially with decimals +type ArgFloat struct { + key string + opt bool + validator []Validator + val *float64 +} + +func (a ArgFloat) Key() string { return a.key } +func (a ArgFloat) Optional() bool { return a.opt } + +// string +type ArgString struct { + key string + opt bool + validator []Validator + val *string +} + +func (a ArgString) Key() string { return a.key } +func (a ArgString) Optional() bool { return a.opt } + +// True or False +type ArgBool struct { + key string + opt bool + val *bool +} + +func (a ArgBool) Key() string { return a.key } +func (a ArgBool) Optional() bool { return a.opt } diff --git a/expr/validator.go b/expr/validator.go new file mode 100644 index 0000000000..26a6baa27e --- /dev/null +++ b/expr/validator.go @@ -0,0 +1,15 @@ +package expr + +import "errors" + +var ErrIntPositive = errors.New("integer must be positive") + +// Validator is a function to validate an input +type Validator func(e *expr) error + +func IntPositive(e *expr) error { + if e.int < 1 { + return ErrIntPositive + } + return nil +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index 2a7cfd2bf6..c836416192 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2012-2013 Dave Collins +ISC License + +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index a8d27a3f67..8a4a6589a2 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe package spew @@ -91,6 +92,21 @@ func init() { flagKindShift = 0 flagRO = 1 << 5 flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } } } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 457e41235e..1fe3cf3d5d 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 14f02dc15b..7c519ff47a 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go index ee1ab07b3f..2e3d22f312 100644 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -64,9 +64,18 @@ type ConfigState struct { // inside these interface methods. As a result, this option relies on // access to the unsafe package, so it will not have any effect when // running in environments without access to the unsafe package such as - // Google App Engine or with the "disableunsafe" build tag specified. + // Google App Engine or with the "safe" build tag specified. DisablePointerMethods bool + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + // ContinueOnMethod specifies whether or not recursion should continue once // a custom error or Stringer interface is invoked. The default, false, // means it will print the results of invoking the custom error or Stringer diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go index 5be0c40609..aacaac6f1e 100644 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,6 +91,15 @@ The following configuration options are available: which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index 36a2b6cc9b..df1d582a72 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) { d.w.Write(closeParenBytes) // Display pointer information. - if len(pointerChain) > 0 { + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { d.w.Write(openParenBytes) for i, addr := range pointerChain { if i > 0 { @@ -181,8 +181,6 @@ func (d *dumpState) dumpSlice(v reflect.Value) { // Try to use existing uint8 slices and fall back to converting // and copying if that fails. case vt.Kind() == reflect.Uint8: - // TODO(davec): Fix up the disableUnsafe bits... - // We need an addressable interface to convert the type // to a byte slice. However, the reflect package won't // give us an interface on certain things like @@ -284,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) { case reflect.Map, reflect.String: valueLen = v.Len() } - if valueLen != 0 || valueCap != 0 { + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { d.w.Write(openParenBytes) if valueLen != 0 { d.w.Write(lenEqualsBytes) printInt(d.w, int64(valueLen), 10) } - if valueCap != 0 { + if !d.cs.DisableCapacities && valueCap != 0 { if valueLen != 0 { d.w.Write(spaceBytes) } diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index ecf3b80e24..c49875bacb 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go index d8233f542e..32c0e33882 100644 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 0000000000..937942c2b2 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 0000000000..82ad7bc8f1 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1344 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + return append(slice[:index], append(elements, slice[index+amount:]...)...) +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + short, long := text1, text2 + if len(short) > len(long) { + short, long = long, short + } + for i, r := range short { + if r != long[i] { + return i + } + } + return len(short) +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + n := min(len(text1), len(text2)) + for i := 0; i < n; i++ { + if text1[len(text1)-i-1] != text2[len(text2)-i-1] { + return i + } + } + return n + + // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + /* + pointermin := 0 + pointermax := math.Min(len(text1), len(text2)) + pointermid := pointermax + pointerend := 0 + for pointermin < pointermid { + if text1[len(text1)-pointermid:len(text1)-pointerend] == + text2[len(text2)-pointermid:len(text2)-pointerend] { + pointermin = pointermid + pointerend = pointermin + } else { + pointermax = pointermid + } + pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) + } + return pointermid + */ +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + + equalities = &equality{ + data: pointer, + next: equalities, + } + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities.data + diffs = append( + diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = append( + diffs[:pointer], + append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) + + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = append( + diffs[:pointer], + append([]Diff{overlap}, diffs[pointer:]...)...) + + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = append(diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += len(aDiff.Text) + case DiffDelete: + deletions += len(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 0000000000..d3acc32ce1 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 0000000000..17374e109f --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 0000000000..116c043481 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + start1 int + start2 int + length1 int + length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indicies are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.length1 == 0 { + coords1 = strconv.Itoa(p.start1) + ",0" + } else if p.length1 == 1 { + coords1 = strconv.Itoa(p.start1 + 1) + } else { + coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1) + } + + if p.length2 == 0 { + coords2 = strconv.Itoa(p.start2) + ",0" + } else if p.length2 == 1 { + coords2 = strconv.Itoa(p.start2 + 1) + } else { + coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.start2 : patch.start2+patch.length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.start2-padding) + minEnd := min(len(text), patch.start2+patch.length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.start2-padding):patch.start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.start1 -= len(prefix) + patch.start2 -= len(prefix) + // Extend the lengths. + patch.length1 += len(prefix) + len(suffix) + patch.length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.start1 = charCount1 + patch.start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.length1 += len(aDiff.Text) + patch.length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.start1 = aPatch.start1 + patchCopy.start2 = aPatch.start2 + patchCopy.length1 = aPatch.length1 + patchCopy.length2 = aPatch.length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.length2 - aPatch.length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].start1 += paddingLength + patches[i].start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].start1 -= paddingLength // Should be 0. + patches[0].start2 -= paddingLength // Should be 0. + patches[0].length1 += paddingLength + patches[0].length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].start1 -= extraLength + patches[0].start2 -= extraLength + patches[0].length1 += extraLength + patches[0].length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].length1 += paddingLength + patches[last].length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].length1 += extraLength + patches[last].length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + start1 := bigpatch.start1 + start2 := bigpatch.start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.start1 = start1 - len(precontext) + patch.start2 = start2 - len(precontext) + if len(precontext) != 0 { + patch.length1 = len(precontext) + patch.length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.length2 += len(diffText) + start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.length1 += len(diffText) + start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)] + + patch.length1 += len(diffText) + start1 += len(diffText) + if diffType == DiffEqual { + patch.length2 += len(diffText) + start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.length1 += len(postcontext) + patch.length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.start1-- + patch.length1 = 1 + } else if m[2] == "0" { + patch.length1 = 0 + } else { + patch.start1-- + patch.length1, _ = strconv.Atoi(m[2]) + } + + patch.start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.start2-- + patch.length2 = 1 + } else if m[4] == "0" { + patch.length2 = 0 + } else { + patch.start2-- + patch.length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 0000000000..265f29cc7e --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 943a1e5f05..4e79df58ab 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -75,10 +75,10 @@ "revisionTime": "2016-12-06T10:26:25Z" }, { - "checksumSHA1": "jqSVRDK7dGg6E/NikVq1Kw6gdbA=", + "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=", "path": "github.com/davecgh/go-spew/spew", - "revision": "2df174808ee097f90d259e432cc04442cf60be21", - "revisionTime": "2015-06-18T03:34:22Z" + "revision": "346938d642f2ec3594ed81d874461961cd0faa76", + "revisionTime": "2016-10-29T20:57:26Z" }, { "checksumSHA1": "vwGGesK6k8uQgnwew609k9Jvcb8=", @@ -332,6 +332,12 @@ "revision": "ed27b6fd65218132ee50cd95f38474a3d8a2cd12", "revisionTime": "2016-06-18T19:32:21Z" }, + { + "checksumSHA1": "v7C+aJ1D/z3MEeCte6bxvpoGjM4=", + "path": "github.com/sergi/go-diff/diffmatchpatch", + "revision": "feef008d51ad2b3778f85d387ccf91735543008d", + "revisionTime": "2017-04-09T07:17:39Z" + }, { "checksumSHA1": "r8yq1UeCPjRb4GO2NyHqEyFxb1w=", "path": "github.com/smartystreets/assertions",